import matplotlib.pyplot as plt
from skimage import io
from matplotlib import cm
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import os, PIL, pathlib, math
from imgaug import augmenters as iaa
from PIL import Image
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
%matplotlib inline
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
def show_grid(image_list,nrows,ncols,label_list=None,show_labels=False,savename=None,figsize=(10,10),showaxis='off'):
if type(image_list) is not list:
if(image_list.shape[-1]==1):
image_list = [image_list[i,:,:,0] for i in range(image_list.shape[0])]
elif(image_list.shape[-1]==3):
image_list = [image_list[i,:,:,:] for i in range(image_list.shape[0])]
fig = plt.figure(None, figsize,frameon=False)
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(nrows, ncols), # creates 2x2 grid of axes
axes_pad=0.3, # pad between axes in inch.
share_all=True,
)
for i in range(nrows*ncols):
ax = grid[i]
ax.imshow((image_list[i]*255).astype(np.uint8),cmap='Greys_r') # The AxesGrid object work as a list of axes.
ax.axis('off')
if show_labels:
ax.set_title(class_mapping[y_int[i]])
if savename != None:
plt.savefig(savename,bbox_inches='tight')
# Defining Image augmentors
data_augmentation = keras.Sequential([
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomTranslation(
0.2, 0.2, fill_mode='reflect', interpolation='bilinear'),
])
aug3 = iaa.MotionBlur(k=5, angle=[-90, 90])
aug4 = iaa.CoarseSaltAndPepper((0,0.02), size_px=(2, 8))
aug5 = iaa.AdditiveGaussianNoise(scale=(0, 0.05*255))
aug6 = iaa.Sharpen(alpha=(0,0.05), lightness=1.0)
aug7 = iaa.Dropout(p=(0, 0.05))
aug8 = iaa.LinearContrast((0.9, 1.0))
def add_aug(image):
#image = aug3.augment_image(image)
#image = aug4.augment_image(image)
#image = aug5.augment_image(image)
#image = aug6.augment_image(image)
#image = aug7.augment_image(image)
#image = aug8.augment_image(image)
return image
# Defining the three models in functions
def simpleModel():
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def AlexNet(): #batch 32, 224, 0.002, 0.01, True
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(96, 11, strides=4, padding='valid', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Conv2D(256, 5, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Conv2D(384, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(384, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_classes)
])
opt = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.01, nesterov=True)
model.compile(optimizer=opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def VGG16(): #batch 64, 224, 0.002, 0.01, True
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(64, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(64, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(128, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(128, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dense(4096, activation='relu'),
layers.Dense(num_classes)
])
opt = tf.keras.optimizers.SGD(learning_rate=0.02, momentum=0.2, nesterov=True)
model.compile(optimizer=opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
num_classes = 7
batch_size = 64
img_height = 227 # 180, 227, 224
img_width = 227 # 180, 227, 224
data_dir = pathlib.Path('takaFrames_dataset/')
#image_count = len(list(data_dir.glob('*/*.jpg')))
#print(image_count)
print("\n")
datagen_args = dict(rescale=1./255, validation_split=0.1, preprocessing_function=add_aug)
datagen = ImageDataGenerator(**datagen_args)
train_ds = datagen.flow_from_directory(
data_dir,
subset="training",
target_size=(img_height, img_width),
batch_size=batch_size)
print("\n")
val_ds = datagen.flow_from_directory(
data_dir,
subset="validation",
target_size=(img_height, img_width),
batch_size=batch_size)
print("\n")
print(train_ds.class_indices)
class_mapping = {v:k for k,v in train_ds.class_indices.items()}
x,y = next(train_ds)
print('x: ',type(x))
print('y: ',type(y))
print('x: ',x.shape)
print('y: ',y.shape)
print("\n")
y_int = np.argmax(y,axis=-1)
show_grid(x,4,8,label_list=y_int,show_labels=True,figsize=(20,10))
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1', '/job:localhost/replica:0/task:0/device:GPU:2', '/job:localhost/replica:0/task:0/device:GPU:3')
Number of devices: 4
Found 43694 images belonging to 7 classes.
Found 4851 images belonging to 7 classes.
{'Children': 0, 'Cosmetics': 1, 'FashionAccessories': 2, 'Household': 3, 'Ladies': 4, 'Men': 5, 'Sports': 6}
x: <class 'numpy.ndarray'>
y: <class 'numpy.ndarray'>
x: (64, 227, 227, 3)
y: (64, 7)
with strategy.scope():
model = AlexNet()
checkpoint = ModelCheckpoint("checkpoint.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=1, mode='auto')
epochs = 15
history = model.fit(train_ds,
validation_data=val_ds,
validation_steps=5,
epochs=epochs, # 100 seems to be enough to achieve >90% accuracies
callbacks=[checkpoint,early]
)
# Plotting results
plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.show()
WARNING:tensorflow:Using MirroredStrategy eagerly has significant overhead currently. We will be working on improving this in the future, but for now please wrap `call_for_each_replica` or `experimental_run` or `experimental_run_v2` inside a tf.function to get the best performance.
Epoch 1/15
WARNING:tensorflow:From /home/mdl-ws/environments/env0/lib/python3.8/site-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
INFO:tensorflow:batch_all_reduce: 26 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:batch_all_reduce: 26 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
683/683 [==============================] - ETA: 0s - loss: 2.0447 - accuracy: 0.3606INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
Epoch 00001: val_accuracy improved from -inf to 0.53438, saving model to checkpoint.h5
683/683 [==============================] - 124s 181ms/step - loss: 2.0447 - accuracy: 0.3606 - val_loss: 1.3394 - val_accuracy: 0.5344
Epoch 2/15
683/683 [==============================] - ETA: 0s - loss: 1.3052 - accuracy: 0.5321
Epoch 00002: val_accuracy did not improve from 0.53438
683/683 [==============================] - 121s 177ms/step - loss: 1.3052 - accuracy: 0.5321 - val_loss: 1.5041 - val_accuracy: 0.5125
Epoch 3/15
683/683 [==============================] - ETA: 0s - loss: 1.0406 - accuracy: 0.6241
Epoch 00003: val_accuracy improved from 0.53438 to 0.57812, saving model to checkpoint.h5
683/683 [==============================] - 123s 179ms/step - loss: 1.0406 - accuracy: 0.6241 - val_loss: 1.0707 - val_accuracy: 0.5781
Epoch 4/15
683/683 [==============================] - ETA: 0s - loss: 0.8648 - accuracy: 0.6864
Epoch 00004: val_accuracy improved from 0.57812 to 0.75937, saving model to checkpoint.h5
683/683 [==============================] - 122s 178ms/step - loss: 0.8648 - accuracy: 0.6864 - val_loss: 0.6990 - val_accuracy: 0.7594
Epoch 5/15
683/683 [==============================] - ETA: 0s - loss: 0.7324 - accuracy: 0.7346
Epoch 00005: val_accuracy did not improve from 0.75937
683/683 [==============================] - 122s 178ms/step - loss: 0.7324 - accuracy: 0.7346 - val_loss: 1.3632 - val_accuracy: 0.5625
Epoch 6/15
683/683 [==============================] - ETA: 0s - loss: 0.6337 - accuracy: 0.7742
Epoch 00006: val_accuracy did not improve from 0.75937
683/683 [==============================] - 121s 177ms/step - loss: 0.6337 - accuracy: 0.7742 - val_loss: 2.3112 - val_accuracy: 0.5000
Epoch 7/15
683/683 [==============================] - ETA: 0s - loss: 0.5461 - accuracy: 0.8058
Epoch 00007: val_accuracy did not improve from 0.75937
683/683 [==============================] - 121s 178ms/step - loss: 0.5461 - accuracy: 0.8058 - val_loss: 0.8977 - val_accuracy: 0.6906
Epoch 8/15
683/683 [==============================] - ETA: 0s - loss: 0.4622 - accuracy: 0.8373
Epoch 00008: val_accuracy did not improve from 0.75937
683/683 [==============================] - 119s 175ms/step - loss: 0.4622 - accuracy: 0.8373 - val_loss: 1.0142 - val_accuracy: 0.6844
Epoch 9/15
683/683 [==============================] - ETA: 0s - loss: 0.3905 - accuracy: 0.8636
Epoch 00009: val_accuracy did not improve from 0.75937
683/683 [==============================] - 121s 178ms/step - loss: 0.3905 - accuracy: 0.8636 - val_loss: 4.4516 - val_accuracy: 0.2469
Epoch 00009: early stopping
# Testing the generalisability of the Takashimaya Ngee Ann City trained model against images from Isetan Shaw Departmental store.
test_dir = pathlib.Path('Isetan45_testSet/')
classes_ds = tf.keras.preprocessing.image_dataset_from_directory('takaFrames_dataset/')
class_names = classes_ds.class_names
imgs_only = [img for img in os.listdir(test_dir)]
for img in imgs_only:
img = os.path.join(test_dir, img)
print(img)
img = keras.preprocessing.image.load_img(img, target_size=(img_width, img_height))
plt.imshow(img)
img = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.show()
print("Likely {} with {:.2f}% confidence.\n\n".format(class_names[np.argmax(score)], 100 * np.max(score)))
Found 48545 files belonging to 7 classes. Isetan45_testSet/Cosmetics_3.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Accesories_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Men_1.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Children_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Cosmetics_2.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Household_1.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Household_3.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Sports_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Cosmetics_1.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Sports_1.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Men_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Ladies_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_3.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Accesories_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Sports_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Household_2.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Sports_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Cosmetics_4.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Ladies_4.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Accesories_3.JPG
Likely Children with 98.49% confidence. Isetan45_testSet/Household_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Cosmetics_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_1.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_2.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Household_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Ladies_3.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Sports_3.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Ladies_5.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Accesories_5.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Ladies_1.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Men_3.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Men_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Accesories_1.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Men_2.JPG
Likely Ladies with 100.00% confidence.